Exponential power distribution (exponpow)#
SciPy’s exponpow is a continuous distribution on \([0,\infty)\) with a tunable near-zero behavior and an extremely light (double-exponential) right tail.
A particularly useful generative story is:
Equivalently,
Important: this is not the symmetric “exponential power / generalized normal” distribution (a common naming collision).
Learning goals#
write down the PDF/CDF and inverse CDF (PPF)
understand the link to the Gompertz and exponential distributions
compute moments, MGF/CF, and entropy via stable integral representations
implement NumPy-only sampling (inverse transform / exponential transform)
use
scipy.stats.exponpowforpdf,cdf,rvs, andfit
import numpy as np
import plotly.graph_objects as go
import os
import plotly.io as pio
from plotly.subplots import make_subplots
from scipy import stats
from scipy.integrate import quad
from scipy.stats import exponpow as exponpow_sp
pio.templates.default = "plotly_white"
pio.renderers.default = os.environ.get("PLOTLY_RENDERER", "notebook")
np.set_printoptions(precision=6, suppress=True)
rng = np.random.default_rng(7)
TINY = np.finfo(float).tiny
1) Title & classification#
Name:
exponpow(SciPy:scipy.stats.exponpow)Type: continuous distribution
Standard support: \(x \in [0,\infty)\)
Parameter space (standard): shape parameter \(b>0\)
SciPy location–scale form:
b > 0(shape)loc \in \mathbb{R}scale > 0
Support with
loc/scale: \(x \in [\mathrm{loc},\infty)\).
2) Intuition & motivation#
What it models#
exponpow is a distribution for a nonnegative quantity (think: a time, a positive magnitude, a delay) with an increasing hazard.
In the standardized form, the survival function is $\( S(x) = \mathbb{P}(X>x) = \exp\bigl(1-\exp(x^b)\bigr),\qquad x\ge 0, \)\( so the **hazard function** is \)\( h(x) = \frac{f(x)}{S(x)} = b\,x^{b-1}\,\exp(x^b). \)\( This grows very quickly for large \)x$, which implies an extremely light right tail.
Typical real-world use cases#
This distribution is less common than Exponential/Gamma/Weibull, but it can be useful when:
you need a strictly nonnegative model
the event rate (hazard) should increase sharply with time (strong “aging”)
you want a very light tail (extremes are much rarer than under Weibull/Gamma)
Relations to other distributions (key for intuition)#
A clean way to understand exponpow is via transformations:
If \(X\sim\mathrm{exponpow}(b)\) (standard), then $\( Y = X^b \sim \mathrm{Gompertz}(c=1) \quad\text{with density}\quad f_Y(y)=\exp\bigl(1+y-\exp(y)\bigr),\ y\ge 0. \)$
If \(U=\exp(X^b)-1\), then $\( U \sim \mathrm{Exp}(1). \)$
Special case: \(b=1\) gives Gompertz. $\( f(x;1) = \exp\bigl(1+x-\exp(x)\bigr),\quad x\ge 0. \)$
These relationships give an immediate NumPy-only sampler and stable formulas for moments.
def _validate_exponpow_params(b: float, scale: float = 1.0) -> None:
if not (b > 0):
raise ValueError("b must be > 0")
if not (scale > 0):
raise ValueError("scale must be > 0")
def exponpow_logpdf(x, b: float, loc: float = 0.0, scale: float = 1.0):
"""Log-PDF of exponpow in SciPy's (b, loc, scale) parameterization (NumPy-only)."""
_validate_exponpow_params(float(b), float(scale))
x = np.asarray(x, dtype=float)
z = (x - loc) / scale
out = np.full_like(z, -np.inf, dtype=float)
mask = z >= 0
zz = z[mask]
zb = zz**b
with np.errstate(divide="ignore", invalid="ignore", over="ignore", under="ignore"):
if b == 1.0:
logz_term = 0.0
else:
logz_term = (b - 1.0) * np.log(zz)
logpdf = 1.0 + np.log(b) + logz_term + zb - np.exp(zb) - np.log(scale)
out[mask] = logpdf
return out
def exponpow_pdf(x, b: float, loc: float = 0.0, scale: float = 1.0):
"""PDF of exponpow in SciPy's (b, loc, scale) parameterization (NumPy-only)."""
return np.exp(exponpow_logpdf(x, b=b, loc=loc, scale=scale))
def exponpow_cdf(x, b: float, loc: float = 0.0, scale: float = 1.0):
"""CDF of exponpow in SciPy's (b, loc, scale) parameterization (NumPy-only)."""
_validate_exponpow_params(float(b), float(scale))
x = np.asarray(x, dtype=float)
z = (x - loc) / scale
out = np.zeros_like(z, dtype=float)
mask = z >= 0
zz = z[mask]
with np.errstate(over="ignore", under="ignore", invalid="ignore"):
out[mask] = -np.expm1(-np.expm1(zz**b))
return out
def exponpow_ppf(q, b: float, loc: float = 0.0, scale: float = 1.0):
"""Inverse CDF (PPF) for q in [0, 1] (NumPy-only)."""
_validate_exponpow_params(float(b), float(scale))
q = np.asarray(q, dtype=float)
if np.any((q < 0) | (q > 1)):
raise ValueError("q must be in [0, 1]")
z = np.empty_like(q, dtype=float)
z[q == 1.0] = np.inf
inner = q < 1.0
# Stable form: log(1 - log(1-q)) implemented as log1p(-log1p(-q)).
z[inner] = np.power(np.log1p(-np.log1p(-q[inner])), 1.0 / b)
return loc + scale * z
def sample_exponpow(size: int, b: float, loc: float = 0.0, scale: float = 1.0, rng=None):
"""Sample from exponpow(b, loc, scale) using a NumPy-only transform."""
_validate_exponpow_params(float(b), float(scale))
if rng is None:
rng = np.random.default_rng()
u = rng.random(size)
e = -np.log1p(-u) # Exp(1)
y = np.log1p(e) # Gompertz(c=1)
z = y ** (1.0 / b) # exponpow(b)
return loc + scale * z
def exponpow_raw_moment(k: int, b: float) -> float:
"""Raw moment E[X^k] for standard exponpow(b), using the Exp-transform integral."""
_validate_exponpow_params(float(b), 1.0)
if k < 0:
raise ValueError("k must be >= 0")
if k == 0:
return 1.0
power = k / b
def integrand(u):
return np.power(np.log1p(u), power) * np.exp(-u)
val, _ = quad(integrand, 0.0, np.inf, epsabs=1e-12, epsrel=1e-10, limit=200)
return float(val)
def exponpow_mgf(t: float, b: float) -> float:
"""MGF M(t)=E[e^{tX}] for standard exponpow(b), computed by quadrature."""
_validate_exponpow_params(float(b), 1.0)
def integrand(u):
x = np.power(np.log1p(u), 1.0 / b)
return np.exp(t * x - u)
val, _ = quad(integrand, 0.0, np.inf, epsabs=1e-12, epsrel=1e-10, limit=200)
return float(val)
def exponpow_cf(omega: float, b: float) -> complex:
"""Characteristic function φ(ω)=E[e^{iωX}] for standard exponpow(b), by quadrature."""
_validate_exponpow_params(float(b), 1.0)
def integrand_re(u):
x = np.power(np.log1p(u), 1.0 / b)
return np.cos(omega * x) * np.exp(-u)
def integrand_im(u):
x = np.power(np.log1p(u), 1.0 / b)
return np.sin(omega * x) * np.exp(-u)
re, _ = quad(integrand_re, 0.0, np.inf, epsabs=1e-12, epsrel=1e-10, limit=200)
im, _ = quad(integrand_im, 0.0, np.inf, epsabs=1e-12, epsrel=1e-10, limit=200)
return complex(re, im)
def exponpow_entropy(b: float) -> float:
"""Differential entropy of standard exponpow(b), computed by quadrature."""
_validate_exponpow_params(float(b), 1.0)
def integrand(u):
x = np.power(np.log1p(u), 1.0 / b)
if b == 1.0:
logpdf = np.log(b) + np.log1p(u) - u
else:
logpdf = np.log(b) + (b - 1.0) * np.log(x) + np.log1p(u) - u
return -logpdf * np.exp(-u)
h, _ = quad(integrand, 0.0, np.inf, epsabs=1e-12, epsrel=1e-10, limit=200)
return float(h)
3) Formal definition#
Standard form#
Support: \(x\ge 0\), shape parameter \(b>0\).
PDF $\( f(x;b) = b\,x^{b-1}\,\exp\bigl(1 + x^b - \exp(x^b)\bigr),\qquad x\ge 0. \)$
CDF $\( F(x;b)=\begin{cases} 0, & x<0,\\ 1-\exp\bigl(-(\exp(x^b)-1)\bigr) = 1-\exp\bigl(1-\exp(x^b)\bigr), & x\ge 0. \end{cases} \)$
Survival $\( S(x;b)=\exp\bigl(1-\exp(x^b)\bigr),\qquad x\ge 0. \)$
Inverse CDF (PPF) for \(q\in(0,1)\): $\( F^{-1}(q;b) = \Bigl[\log\bigl(1-\log(1-q)\bigr)\Bigr]^{1/b}. \)$
Location–scale form (SciPy)#
If \(Y\sim\mathrm{exponpow}(b)\) in standard form and \(X = \mathrm{loc}+\mathrm{scale}\,Y\) with \(\mathrm{scale}>0\), then $\( f_X(x;b,\mathrm{loc},\mathrm{scale}) = \frac{1}{\mathrm{scale}}\, f_Y\!\left(\frac{x-\mathrm{loc}}{\mathrm{scale}};b\right), \qquad F_X(x;b,\mathrm{loc},\mathrm{scale}) = F_Y\!\left(\frac{x-\mathrm{loc}}{\mathrm{scale}};b\right), \)\( with support \)x\ge \mathrm{loc}$.
4) Moments & properties#
Existence of moments#
The right tail behaves like \(\exp(-\exp(x^b))\), so it is super-light: all positive moments exist, and the MGF exists for all real \(t\).
Near \(0\), the PDF behaves like \(f(x;b)\approx b\,x^{b-1}\), so the density:
diverges at 0 if \(0<b<1\)
is finite at 0 if \(b=1\)
is 0 at 0 if \(b>1\)
A very useful transform (turns everything into an Exp integral)#
Let $\( U = \exp(X^b)-1. \)\( Then one can show \)U\sim\mathrm{Exp}(1)\( and \)\( X = \bigl(\log(1+U)\bigr)^{1/b}. \)$
This gives raw moments (for integer \(k\ge 0\)): $\( \mathbb{E}[X^k] = \int_{0}^{\infty} \bigl(\log(1+u)\bigr)^{k/b}\,e^{-u}\,du. \)$
From raw moments \(m_k=\mathbb{E}[X^k]\):
mean \(\mu=m_1\)
variance \(\sigma^2=m_2-m_1^2\)
skewness \(\gamma_1 = \mu_3/\sigma^3\) where \(\mu_3=m_3-3m_2m_1+2m_1^3\)
excess kurtosis \(\gamma_2 = \mu_4/\sigma^4 - 3\) where \(\mu_4=m_4-4m_3m_1+6m_2m_1^2-3m_1^4\)
MGF / characteristic function#
Using the same transform, $\( M(t)=\mathbb{E}[e^{tX}] = \int_{0}^{\infty} \exp\Bigl(t\,\bigl(\log(1+u)\bigr)^{1/b}\Bigr)\,e^{-u}\,du, \)\( which is finite for all real \)t$.
The characteristic function is $\( \varphi(\omega)=\mathbb{E}[e^{i\omega X}] = \int_{0}^{\infty} \exp\Bigl(i\omega\,\bigl(\log(1+u)\bigr)^{1/b}\Bigr)\,e^{-u}\,du. \)$
Entropy#
The differential entropy is $\( h(X) = -\int_0^{\infty} f(x)\,\log f(x)\,dx, \)\( which can be evaluated stably via the same \)U\sim\mathrm{Exp}(1)\( transform: \)\( h(X) = -\int_0^{\infty} \log f\bigl((\log(1+u))^{1/b}\bigr)\,e^{-u}\,du. \)$
For the location–scale family, entropy shifts by \(\log(\mathrm{scale})\): $\( h(\mathrm{loc}+\mathrm{scale}\,Y) = h(Y) + \log(\mathrm{scale}). \)$
# Numerical moments/properties for one shape value, and cross-check against SciPy
b0 = 2.0
m1 = exponpow_raw_moment(1, b0)
m2 = exponpow_raw_moment(2, b0)
m3 = exponpow_raw_moment(3, b0)
m4 = exponpow_raw_moment(4, b0)
var = m2 - m1**2
mu3 = m3 - 3 * m2 * m1 + 2 * m1**3
mu4 = m4 - 4 * m3 * m1 + 6 * m2 * m1**2 - 3 * m1**4
skew = mu3 / (var ** 1.5)
kurt_excess = mu4 / (var**2) - 3
entropy_num = exponpow_entropy(b0)
mean_s, var_s, skew_s, kurt_excess_s = exponpow_sp.stats(b0, moments="mvsk")
entropy_s = exponpow_sp.entropy(b0)
print(f"b = {b0}")
print("mean (quad) :", m1)
print("mean (SciPy) :", float(mean_s))
print("var (quad) :", var)
print("var (SciPy) :", float(var_s))
print("skew (quad) :", skew)
print("skew (SciPy) :", float(skew_s))
print("kurt excess (quad) :", kurt_excess)
print("kurt excess (SciPy):", float(kurt_excess_s))
print("entropy (quad) :", entropy_num)
print("entropy (SciPy) :", float(entropy_s))
# MGF/CF checks (quadrature vs Monte Carlo)
t1, t2 = 1.0, -1.0
mgf_t1 = exponpow_mgf(t1, b0)
mgf_t2 = exponpow_mgf(t2, b0)
cf_w1 = exponpow_cf(1.0, b0)
n_mc = 200_000
x_mc = sample_exponpow(n_mc, b=b0, rng=rng)
mgf_mc_t1 = float(np.mean(np.exp(t1 * x_mc)))
mgf_mc_t2 = float(np.mean(np.exp(t2 * x_mc)))
cf_mc_w1 = complex(np.mean(np.exp(1j * 1.0 * x_mc)))
print("\nMGF/CF sanity checks")
print("M(1) quad / MC:", mgf_t1, mgf_mc_t1)
print("M(-1) quad / MC:", mgf_t2, mgf_mc_t2)
print("phi(1) quad / MC:", cf_w1, cf_mc_w1)
b = 2.0
mean (quad) : 0.7157241036182413
mean (SciPy) : 0.7157241036160373
var (quad) : 0.08408636982305884
var (SciPy) : 0.0840863698206612
skew (quad) : -0.07996816284010574
skew (SciPy) : -0.07996816537754364
kurt excess (quad) : -0.6442012956595629
kurt excess (SciPy): -0.6442012687791223
entropy (quad) : 0.15916045411593882
entropy (SciPy) : 0.1591604541159396
MGF/CF sanity checks
M(1) quad / MC: 2.1324299366788018 2.1329246561336936
M(-1) quad / MC: 0.5098943716313215 0.5095343059718397
phi(1) quad / MC: (0.723201242055198+0.6292690069604462j) (0.7230631556167272+0.6297777730873335j)
5) Parameter interpretation#
Shape parameter \(b\)#
A very interpretable view is: $\( X = Y^{1/b},\qquad Y\sim\mathrm{Gompertz}(c=1). \)$
So \(b\) simply controls a power transform of a fixed base distribution.
If \(b>1\), then \(1/b<1\) and you take a root: values are pulled toward 1, and the distribution becomes more concentrated.
If \(b=1\), you get the base Gompertz distribution.
If \(0<b<1\), then \(1/b>1\) and you take a power: small values shrink further toward 0 and large values expand, increasing spread.
Near \(0\), \(f(x;b)\approx b x^{b-1}\), so \(b\) also controls whether the density is spiky at 0.
loc and scale#
SciPy uses the standard location–scale family: $\( X = \mathrm{loc} + \mathrm{scale}\,Y,\qquad Y\sim\mathrm{exponpow}(b),\ \mathrm{scale}>0. \)$
locshifts the distribution to start atloc.scalestretches the \(x\)-axis and rescales the density height by $1/\mathrm{scale}`.
# Shape changes as b varies (standardized distribution)
bs = [0.5, 1.0, 2.0, 5.0]
x_max = float(exponpow_ppf(0.999, b=min(bs)))
x = np.linspace(0.0, x_max, 900)
fig = make_subplots(rows=1, cols=2, subplot_titles=["PDF", "CDF"])
for b in bs:
fig.add_trace(go.Scatter(x=x, y=exponpow_pdf(x, b=b), mode="lines", name=f"b={b}"), row=1, col=1)
fig.add_trace(go.Scatter(x=x, y=exponpow_cdf(x, b=b), mode="lines", showlegend=False), row=1, col=2)
fig.update_xaxes(title_text="x", row=1, col=1)
fig.update_xaxes(title_text="x", row=1, col=2)
fig.update_yaxes(title_text="density", row=1, col=1)
fig.update_yaxes(title_text="probability", row=1, col=2)
fig.update_layout(width=1050, height=380, legend_title_text="shape")
fig.show()
6) Derivations#
Expectation (and all raw moments)#
Start from the definition: $\( \mathbb{E}[X^k] = \int_0^{\infty} x^k\,f(x;b)\,dx. \)\( Use the substitution \)\( u = \exp(x^b)-1\quad\Rightarrow\quad du = b\,x^{b-1}\,\exp(x^b)\,dx. \)\( Now rewrite the density: \)\( f(x;b) = b\,x^{b-1}\,\exp(x^b)\,\exp\bigl(1-\exp(x^b)\bigr) = du\,\exp\bigl(-(\exp(x^b)-1)\bigr) = e^{-u}\,du. \)\( Also \)x^b=\log(1+u)\(, so \)x=(\log(1+u))^{1/b}\(. Therefore: \)\( \mathbb{E}[X^k] = \int_0^{\infty} \bigl(\log(1+u)\bigr)^{k/b}\,e^{-u}\,du. \)$
Variance#
Compute \(m_1=\mathbb{E}[X]\) and \(m_2=\mathbb{E}[X^2]\) using the integral above, then $\( \mathrm{Var}(X) = m_2 - m_1^2. \)$
Likelihood (i.i.d. sample)#
Let \(x_1,\dots,x_n\) be i.i.d. observations and define \(z_i=(x_i-\mathrm{loc})/\mathrm{scale}\).
The log-likelihood for parameters \((b,\mathrm{loc},\mathrm{scale})\) with \(b>0\) and \(\mathrm{scale}>0\) is $$ \ell(b,\mathrm{loc},\mathrm{scale}) = \sum_{i=1}^n \log f_X(x_i) = n,(1+\log b-\log\mathrm{scale})
(b-1)\sum_{i=1}^n \log z_i
\sum_{i=1}^n z_i^b
\sum_{i=1}^n \exp(z_i^b), $\( with the **support constraint** \)z_i\ge 0\( for all \)i$ (otherwise the likelihood is 0).
Because of the \(\exp(z_i^b)\) term, maximizing this likelihood typically requires numerical optimization and careful handling of overflow/underflow (use logpdf).
# Quick numerical sanity check: integrate PDF over a high-quantile range
b_check = 2.0
q = 0.999999
x_max = float(exponpow_ppf(q, b=b_check))
x = np.linspace(0.0, x_max, 500_000)
pdf = exponpow_pdf(x, b=b_check)
mass = float(np.trapz(pdf, x))
mean_trunc = float(np.trapz(x * pdf, x))
var_trunc = float(np.trapz((x - mean_trunc) ** 2 * pdf, x))
print("target mass ~", q)
print("mass (trapezoid)", mass)
print("mean (trunc) ", mean_trunc)
print("var (trunc) ", var_trunc)
print("mean (quad) ", exponpow_raw_moment(1, b_check))
print("var (quad) ", exponpow_raw_moment(2, b_check) - exponpow_raw_moment(1, b_check) ** 2)
target mass ~ 0.999999
mass (trapezoid) 0.9999989999982012
mean (trunc) 0.7157224426511368
var (trunc) 0.08408547601757835
mean (quad) 0.7157241036182413
var (quad) 0.08408636982305884
7) Sampling & simulation (NumPy-only)#
Algorithm (inverse transform via an exponential)#
Use the transform \(U=\exp(X^b)-1\).
Sample \(U\sim\mathrm{Exp}(1)\) using a uniform \(V\sim\mathrm{Uniform}(0,1)\): $\( U = -\log(1-V). \)$
Set \(Y=\log(1+U)\) (then \(Y\sim\mathrm{Gompertz}(1)\)).
Return \(X=Y^{1/b}\).
This is equivalent to using the analytic PPF.
n = 120_000
b_samp = 2.0
x = sample_exponpow(n, b=b_samp, rng=rng)
# Transform check: U = exp(X^b) - 1 should be Exp(1)
u = np.expm1(x**b_samp)
print("X mean ~", x.mean())
print("X var ~", x.var())
print("U mean ~", u.mean(), "(Exp(1) mean is 1)")
print("U var ~", u.var(), "(Exp(1) var is 1)")
# Equivalence check: PPF matches the Exp-transform when driven by the same Uniform(0,1)
q = rng.random(n)
x_ppf = exponpow_ppf(q, b=b_samp)
e = -np.log1p(-q)
x_transform = np.power(np.log1p(e), 1.0 / b_samp)
print("max |ppf - transform|:", float(np.max(np.abs(x_ppf - x_transform))))
X mean ~ 0.715674128103815
X var ~ 0.08409526226307099
U mean ~ 0.9996192849611022 (Exp(1) mean is 1)
U var ~ 0.9958703903250411 (Exp(1) var is 1)
max |ppf - transform|: 0.0
8) Visualization#
Below are:
the analytic PDF and CDF (NumPy-only implementations)
a Monte Carlo histogram overlaid with the PDF
b_vis = 2.0
x_max = float(exponpow_ppf(0.999, b=b_vis))
x_grid = np.linspace(0.0, x_max, 900)
pdf_grid = exponpow_pdf(x_grid, b=b_vis)
cdf_grid = exponpow_cdf(x_grid, b=b_vis)
samples = sample_exponpow(80_000, b=b_vis, rng=rng)
fig = make_subplots(rows=1, cols=3, subplot_titles=["PDF", "CDF", "Samples (hist) + PDF"])
fig.add_trace(go.Scatter(x=x_grid, y=pdf_grid, mode="lines", name="pdf"), row=1, col=1)
fig.add_trace(go.Scatter(x=x_grid, y=cdf_grid, mode="lines", name="cdf"), row=1, col=2)
fig.add_trace(
go.Histogram(x=samples, nbinsx=70, histnorm="probability density", name="samples", opacity=0.6),
row=1,
col=3,
)
fig.add_trace(go.Scatter(x=x_grid, y=pdf_grid, mode="lines", name="pdf"), row=1, col=3)
for c in [1, 2, 3]:
fig.update_xaxes(title_text="x", row=1, col=c)
fig.update_yaxes(title_text="density", row=1, col=1)
fig.update_yaxes(title_text="probability", row=1, col=2)
fig.update_yaxes(title_text="density", row=1, col=3)
fig.update_layout(width=1100, height=380, showlegend=False)
fig.show()
9) SciPy integration (scipy.stats.exponpow)#
scipy.stats.exponpow exposes the standardized distribution with a shape parameter b, plus loc and scale.
Common methods:
exponpow_sp.pdf(x, b, loc=..., scale=...)exponpow_sp.cdf(x, b, loc=..., scale=...)exponpow_sp.rvs(b, loc=..., scale=..., size=..., random_state=...)exponpow_sp.fit(data)→ estimates(b, loc, scale)
# Match our NumPy-only PDF/CDF to SciPy (standard case)
x = np.linspace(0.0, 2.0, 11)
b = 2.0
pdf_diff = np.max(np.abs(exponpow_pdf(x, b=b) - exponpow_sp.pdf(x, b)))
cdf_diff = np.max(np.abs(exponpow_cdf(x, b=b) - exponpow_sp.cdf(x, b)))
print("max |pdf - scipy|:", float(pdf_diff))
print("max |cdf - scipy|:", float(cdf_diff))
# Demonstrate rvs + fit on location-scale data
b_true, loc_true, scale_true = 2.3, -0.4, 1.7
data = exponpow_sp.rvs(b_true, loc=loc_true, scale=scale_true, size=2500, random_state=rng)
b_hat, loc_hat, scale_hat = exponpow_sp.fit(data)
print("\ntrue (b, loc, scale):", (b_true, loc_true, scale_true))
print("fit (b, loc, scale):", (b_hat, loc_hat, scale_hat))
# Visualize fitted vs true PDF
x_grid = np.linspace(np.min(data), np.quantile(data, 0.999), 700)
fig = go.Figure()
fig.add_trace(go.Histogram(x=data, nbinsx=70, histnorm="probability density", name="data", opacity=0.55))
fig.add_trace(
go.Scatter(x=x_grid, y=exponpow_sp.pdf(x_grid, b_true, loc=loc_true, scale=scale_true), mode="lines", name="true pdf")
)
fig.add_trace(
go.Scatter(x=x_grid, y=exponpow_sp.pdf(x_grid, b_hat, loc=loc_hat, scale=scale_hat), mode="lines", name="fit pdf")
)
fig.update_layout(width=950, height=420, title="SciPy fit: true vs fitted PDF")
fig.show()
max |pdf - scipy|: 0.0
max |cdf - scipy|: 1.1102230246251565e-16
true (b, loc, scale): (2.3, -0.4, 1.7)
fit (b, loc, scale): (2.272004091710933, -0.3759151362515315, 1.6789211052609123)
10) Statistical use cases#
Hypothesis testing#
A typical question is goodness-of-fit: does an exponpow model plausibly generate this data?
A common test statistic is the Kolmogorov–Smirnov (KS) distance. If you estimate parameters from the data (via fit) and then run a vanilla KS test, the p-value is not exact.
A practical workaround is a parametric bootstrap that repeats the fitting step on simulated data.
Bayesian modeling#
exponpow can be used as a likelihood/prior over nonnegative quantities. Because it has a single shape parameter \(b\), it is convenient to demonstrate a 1D grid posterior.
Generative modeling#
In simulation pipelines, exponpow provides a flexible way to generate nonnegative magnitudes with a controllable spike at 0 (via \(b\)) and a very light tail.
# A) Hypothesis testing: parametric bootstrap KS for fitted exponpow
def ks_statistic_to_fitted_exponpow(sample):
b_hat, loc_hat, scale_hat = exponpow_sp.fit(sample)
fitted = exponpow_sp(b_hat, loc=loc_hat, scale=scale_hat)
return stats.kstest(sample, fitted.cdf).statistic
n = 350
b_true, loc_true, scale_true = 2.0, 0.3, 1.1
x_obs = exponpow_sp.rvs(b_true, loc=loc_true, scale=scale_true, size=n, random_state=rng)
D_obs = ks_statistic_to_fitted_exponpow(x_obs)
B = 250 # keep modest for notebook runtime
b_hat, loc_hat, scale_hat = exponpow_sp.fit(x_obs)
fitted = exponpow_sp(b_hat, loc=loc_hat, scale=scale_hat)
Ds = np.empty(B)
for j in range(B):
sim = fitted.rvs(size=n, random_state=rng)
Ds[j] = ks_statistic_to_fitted_exponpow(sim)
p_boot = (np.sum(Ds >= D_obs) + 1) / (B + 1)
print("KS statistic (observed):", D_obs)
print("bootstrap p-value :", p_boot)
KS statistic (observed): 0.024309941278901404
bootstrap p-value : 0.9043824701195219
# B) Bayesian modeling: grid posterior for b (loc/scale assumed known)
b_true = 2.0
x_obs = sample_exponpow(140, b=b_true, rng=rng) # standard (loc=0, scale=1)
grid = np.linspace(0.25, 6.0, 800)
# Log-likelihood under our NumPy-only logpdf
loglike = np.array([exponpow_logpdf(x_obs, b=b).sum() for b in grid])
# A simple (improper) log-uniform prior: p(b) ∝ 1/b over the grid
logprior = -np.log(grid)
logpost = loglike + logprior
logpost -= logpost.max() # stabilize
post = np.exp(logpost)
post /= np.trapz(post, grid)
b_map = float(grid[np.argmax(post)])
fig = go.Figure()
fig.add_trace(go.Scatter(x=grid, y=post, mode="lines", name="posterior"))
fig.add_vline(x=b_true, line_dash="dash", line_color="black", annotation_text="true b")
fig.add_vline(x=b_map, line_dash="dot", line_color="red", annotation_text="MAP")
fig.update_layout(width=950, height=380, title="Posterior over b (standard case)", xaxis_title="b", yaxis_title="density")
fig.show()
print("true b:", b_true)
print("MAP b :", b_map)
true b: 2.0
MAP b : 2.0923028785982476
# C) Generative modeling: 2D radial noise with exponpow-distributed radius
def radial_noise(n: int, b: float, scale: float, rng=None):
if rng is None:
rng = np.random.default_rng()
theta = rng.uniform(0.0, 2 * np.pi, size=n)
r = sample_exponpow(n, b=b, scale=scale, rng=rng)
x = r * np.cos(theta)
y = r * np.sin(theta)
return x, y, r
n = 3000
scale = 0.35
b_small, b_large = 0.5, 5.0
x1, y1, r1 = radial_noise(n, b=b_small, scale=scale, rng=rng)
x2, y2, r2 = radial_noise(n, b=b_large, scale=scale, rng=rng)
fig = make_subplots(
rows=2,
cols=2,
subplot_titles=[f"scatter (b={b_small})", f"scatter (b={b_large})", "radius histogram", ""],
row_heights=[0.7, 0.3],
)
fig.add_trace(
go.Scatter(x=x1, y=y1, mode="markers", marker=dict(size=3, opacity=0.45), name=f"b={b_small}"),
row=1,
col=1,
)
fig.add_trace(
go.Scatter(x=x2, y=y2, mode="markers", marker=dict(size=3, opacity=0.45), name=f"b={b_large}"),
row=1,
col=2,
)
fig.add_trace(
go.Histogram(x=r1, nbinsx=60, histnorm="probability density", opacity=0.55, name=f"b={b_small}"),
row=2,
col=1,
)
fig.add_trace(
go.Histogram(x=r2, nbinsx=60, histnorm="probability density", opacity=0.55, name=f"b={b_large}"),
row=2,
col=1,
)
fig.update_xaxes(title_text="x", row=1, col=1)
fig.update_yaxes(title_text="y", row=1, col=1)
fig.update_xaxes(title_text="x", row=1, col=2)
fig.update_yaxes(title_text="y", row=1, col=2)
fig.update_xaxes(title_text="radius", row=2, col=1)
fig.update_yaxes(title_text="density", row=2, col=1)
fig.update_layout(width=1050, height=750, title="Exponpow as a generative prior for nonnegative magnitudes")
fig.show()
11) Pitfalls#
Parameter validity:
bmust be strictly positive;scalemust be strictly positive.Support: in the location–scale form, the support is \(x\ge\mathrm{loc}\). For \(x<\mathrm{loc}\), the PDF is 0 and
logpdfis \(-\infty\).Naming collision: SciPy’s
exponpowis not the symmetric generalized-normal “exponential power” distribution.Overflow/underflow: terms like \(\exp(x^b)\) explode quickly; for large \(x\) use
logpdfrather thanpdf.Zeros in data: if \(0<b<1\), the density diverges at 0; exact zeros (from rounding/thresholding) can strongly affect fitting.
12) Summary#
exponpowis a continuous distribution on \([0,\infty)\) with a single shape parameter \(b\).The survival function is \(S(x)=\exp(1-\exp(x^b))\), giving an extremely light tail and an increasing hazard.
The transform \(U=\exp(X^b)-1\) yields \(U\sim\mathrm{Exp}(1)\), which provides:
a clean NumPy-only sampler
stable integral formulas for moments, MGF/CF, and entropy
SciPy provides
scipy.stats.exponpowfor evaluation, sampling, and fitting.